static void build_page_list(struct task_struct *p)
{
- unsigned long * list;
+ unsigned long *list;
unsigned long curr;
- unsigned long page;
struct list_head *list_ent;
- list = (unsigned long *)map_domain_mem(p->pg_head << PAGE_SHIFT);
- curr = page = p->pg_head;
- do {
- *list++ = page;
- list_ent = frame_table[page].list.next;
- page = list_entry(list_ent, struct pfn_info, list) - frame_table;
- if( !((unsigned long)list & (PAGE_SIZE-1)) )
+ curr = list_entry(p->pg_head.next, struct pfn_info, list) - frame_table;
+ list = (unsigned long *)map_domain_mem(curr << PAGE_SHIFT);
+
+ list_for_each(list_ent, &p->pg_head)
+ {
+ *list++ = list_entry(list_ent, struct pfn_info, list) - frame_table;
+
+ if( ((unsigned long)list & ~PAGE_MASK) == 0 )
{
- list_ent = frame_table[curr].list.next;
- curr = list_entry(list_ent, struct pfn_info, list) - frame_table;
+ struct list_head *ent = frame_table[curr].list.next;
+ curr = list_entry(ent, struct pfn_info, list) - frame_table;
unmap_domain_mem(list-1);
list = (unsigned long *)map_domain_mem(curr << PAGE_SHIFT);
}
}
- while ( page != p->pg_head );
+
unmap_domain_mem(list);
}
pro = (pro+1) % smp_num_cpus;
p->processor = pro;
- /* if we are not booting dom 0 than only mem
- * needs to be allocated
- */
- if(dom != 0){
-
- if(alloc_new_dom_mem(p, op.u.newdomain.memory_kb) != 0){
- ret = -1;
- break;
- }
- build_page_list(p);
-
- ret = p->domain;
-
- op.u.newdomain.domain = ret;
- op.u.newdomain.pg_head = p->pg_head;
- copy_to_user(u_dom0_op, &op, sizeof(op));
+ if ( dom == 0 ) BUG();
- break;
- }
+ ret = alloc_new_dom_mem(p, op.u.newdomain.memory_kb);
+ if ( ret != 0 ) break;
- /* executed only in case of domain 0 */
- ret = setup_guestos(p, &op.u.newdomain); /* Load guest OS into @p */
- if ( ret != 0 )
- {
- p->state = TASK_DYING;
- release_task(p);
- break;
- }
- wake_up(p); /* Put @p on runqueue */
- reschedule(p); /* Force a scheduling decision on @p's CPU */
+ build_page_list(p);
+
ret = p->domain;
+
+ op.u.newdomain.domain = ret;
+ op.u.newdomain.pg_head =
+ list_entry(p->pg_head.next, struct pfn_info, list) -
+ frame_table;
+ copy_to_user(u_dom0_op, &op, sizeof(op));
}
break;
#include <asm/msr.h>
#include <xeno/multiboot.h>
-#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
-#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
extern int nr_mods;
extern module_t *mod;
*/
p->blk_ring_base = (blk_ring_t *)(p->shared_info + 1);
p->net_ring_base = (net_ring_t *)(p->blk_ring_base + 1);
- p->pg_head = p->tot_pages = 0;
+ INIT_LIST_HEAD(&p->pg_head);
+ p->tot_pages = 0;
write_lock_irqsave(&tasklist_lock, flags);
SET_LINKS(p);
write_unlock_irqrestore(&tasklist_lock, flags);
/* Release resources belonging to task @p. */
void release_task(struct task_struct *p)
{
+ struct list_head *list_ent, *tmp;
+
ASSERT(!__task_on_runqueue(p));
ASSERT(p->state == TASK_DYING);
ASSERT(!p->has_cpu);
}
if ( p->mm.perdomain_pt ) free_page((unsigned long)p->mm.perdomain_pt);
free_page((unsigned long)p->shared_info);
- if ( p->tot_pages != 0 )
+
+ list_for_each_safe(list_ent, tmp, &p->pg_head)
{
- /* Splice domain's pages into the free list. */
- struct list_head *first = &frame_table[p->pg_head].list;
- struct list_head *last = first->prev;
- free_list.next->prev = last;
- last->next = free_list.next;
- free_list.next = first;
- first->prev = &free_list;
- free_pfns += p->tot_pages;
+ struct pfn_info *pf = list_entry(list_ent, struct pfn_info, list);
+ pf->type_count = pf->tot_count = pf->flags = 0;
+ list_del(list_ent);
+ list_add(list_ent, &free_list);
}
+
free_task_struct(p);
}
unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
{
struct list_head *temp;
- struct pfn_info *pf, *pf_head;
+ struct pfn_info *pf;
unsigned int alloc_pfns;
unsigned int req_pages;
req_pages = kbytes >> (PAGE_SHIFT - 10);
/* is there enough mem to serve the request? */
- if(req_pages > free_pfns)
- return -1;
+ if ( req_pages > free_pfns ) return -1;
/* allocate pages and build a thread through frame_table */
temp = free_list.next;
-
- /* allocate first page */
- pf = pf_head = list_entry(temp, struct pfn_info, list);
- pf->flags |= p->domain;
- temp = temp->next;
- list_del(&pf->list);
- INIT_LIST_HEAD(&pf->list);
- p->pg_head = pf - frame_table;
- pf->type_count = pf->tot_count = 0;
- free_pfns--;
-
- /* allocate the rest */
- for ( alloc_pfns = req_pages - 1; alloc_pfns; alloc_pfns-- )
+ for ( alloc_pfns = 0; alloc_pfns < req_pages; alloc_pfns++ )
{
pf = list_entry(temp, struct pfn_info, list);
pf->flags |= p->domain;
+ pf->type_count = pf->tot_count = 0;
temp = temp->next;
list_del(&pf->list);
-
- list_add_tail(&pf->list, &pf_head->list);
- pf->type_count = pf->tot_count = 0;
-
+ list_add_tail(&pf->list, &p->pg_head);
free_pfns--;
}
static unsigned long alloc_page_from_domain(unsigned long * cur_addr,
unsigned long * index)
{
- struct list_head *ent = frame_table[*cur_addr >> PAGE_SHIFT].list.prev;
+ unsigned long ret = *cur_addr;
+ struct list_head *ent = frame_table[ret >> PAGE_SHIFT].list.prev;
*cur_addr = list_entry(ent, struct pfn_info, list) - frame_table;
*cur_addr <<= PAGE_SHIFT;
(*index)--;
- return *cur_addr;
+ return ret;
}
/* setup_guestos is used for building dom0 solely. other domains are built in
}
if ( alloc_new_dom_mem(p, params->memory_kb) ) return -ENOMEM;
- alloc_address = p->pg_head << PAGE_SHIFT;
+ alloc_address = list_entry(p->pg_head.prev, struct pfn_info, list) -
+ frame_table;
+ alloc_address <<= PAGE_SHIFT;
alloc_index = p->tot_pages;
if ( (mod[nr_mods-1].mod_end-mod[0].mod_start) >
*/
l2tab += l2_table_offset(virt_load_address);
- cur_address = p->pg_head << PAGE_SHIFT;
+ cur_address = list_entry(p->pg_head.next, struct pfn_info, list) -
+ frame_table;
+ cur_address <<= PAGE_SHIFT;
for ( count = 0; count < p->tot_pages + 1; count++ )
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
unmap_domain_mem(l1start);
/* pages that are part of page tables must be read only */
- cur_address = p->pg_head << PAGE_SHIFT;
+ cur_address = list_entry(p->pg_head.next, struct pfn_info, list) -
+ frame_table;
+ cur_address <<= PAGE_SHIFT;
for ( count = 0; count < alloc_index; count++ )
{
list_ent = frame_table[cur_address >> PAGE_SHIFT].list.next;